From 66467b424705135e613bb8f86f1f365e65731900 Mon Sep 17 00:00:00 2001 From: "kaf24@scramble.cl.cam.ac.uk" Date: Tue, 27 Jul 2004 23:40:31 +0000 Subject: [PATCH] bitkeeper revision 1.1108.28.1 (4106e7efzfLYJJxhDUfFLjrg1-JKEw) Finish merge of old domain allocation code with tehe buddy-allocator system. Now just needs exporting to priv guest OSes so they can allocate multi-page physmem chunks. Then to fix Linux's pci_alloc_consistent(). --- xen/arch/x86/domain.c | 2 +- xen/arch/x86/shadow.c | 2 +- xen/common/dom_mem_ops.c | 2 +- xen/common/domain.c | 94 +--------------------------------- xen/common/page_alloc.c | 108 +++++++++++++++++++++++++++++++++++++-- xen/include/asm-x86/mm.h | 5 +- xen/include/xen/mm.h | 9 ++-- 7 files changed, 116 insertions(+), 106 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 73a838d6fb..6f6b7b620f 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -476,7 +476,7 @@ void domain_relinquish_memory(struct domain *d) put_page(page); /* Relinquish all pages on the domain's allocation list. */ - spin_lock_recursive(&d->page_alloc_lock); /* may enter free_domain_page */ + spin_lock_recursive(&d->page_alloc_lock); /* may enter free_domheap_page */ list_for_each_safe ( ent, tmp, &d->page_list ) { page = list_entry(ent, struct pfn_info, list); diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c index a98cf3076c..362b0f4560 100644 --- a/xen/arch/x86/shadow.c +++ b/xen/arch/x86/shadow.c @@ -512,7 +512,7 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc) static inline struct pfn_info *alloc_shadow_page(struct mm_struct *m) { m->shadow_page_count++; - return alloc_domheap_page(); + return alloc_domheap_page(NULL); } void unshadow_table( unsigned long gpfn, unsigned int type ) diff --git a/xen/common/dom_mem_ops.c b/xen/common/dom_mem_ops.c index 864ea3a4c1..240eb60086 100644 --- a/xen/common/dom_mem_ops.c +++ b/xen/common/dom_mem_ops.c @@ -24,7 +24,7 @@ static long alloc_dom_mem(struct domain *d, for ( i = 0; i < nr_pages; i++ ) { - if ( unlikely((page = alloc_domain_page(d)) == NULL) ) + if ( unlikely((page = alloc_domheap_page(d)) == NULL) ) { DPRINTK("Could not allocate a frame\n"); break; diff --git a/xen/common/domain.c b/xen/common/domain.c index 888c88f983..8f12c624a8 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -15,7 +15,6 @@ #include #include #include -#include #include /* Both these structures are protected by the tasklist_lock. */ @@ -194,97 +193,6 @@ void domain_shutdown(u8 reason) __enter_scheduler(); } -struct pfn_info *alloc_domain_page(struct domain *d) -{ - struct pfn_info *page = NULL; - unsigned long mask, pfn_stamp, cpu_stamp; - int i; - - ASSERT(!in_irq()); - - page = alloc_domheap_page(); - if ( unlikely(page == NULL) ) - return NULL; - - if ( (mask = page->u.free.cpu_mask) != 0 ) - { - pfn_stamp = page->tlbflush_timestamp; - for ( i = 0; (mask != 0) && (i < smp_num_cpus); i++ ) - { - if ( mask & (1<u.inuse.domain = d; - page->u.inuse.type_info = 0; - if ( d != NULL ) - { - wmb(); /* Domain pointer must be visible before updating refcnt. */ - spin_lock(&d->page_alloc_lock); - if ( unlikely(d->tot_pages >= d->max_pages) ) - { - DPRINTK("Over-allocation for domain %u: %u >= %u\n", - d->domain, d->tot_pages, d->max_pages); - spin_unlock(&d->page_alloc_lock); - page->u.inuse.domain = NULL; - goto free_and_exit; - } - list_add_tail(&page->list, &d->page_list); - page->u.inuse.count_info = PGC_allocated | 1; - if ( unlikely(d->tot_pages++ == 0) ) - get_domain(d); - spin_unlock(&d->page_alloc_lock); - } - - return page; - - free_and_exit: - free_domheap_page(page); - return NULL; -} - -void free_domain_page(struct pfn_info *page) -{ - int drop_dom_ref; - struct domain *d = page->u.inuse.domain; - - if ( unlikely(IS_XEN_HEAP_FRAME(page)) ) - { - spin_lock_recursive(&d->page_alloc_lock); - drop_dom_ref = (--d->xenheap_pages == 0); - spin_unlock_recursive(&d->page_alloc_lock); - } - else - { - page->tlbflush_timestamp = tlbflush_clock; - page->u.free.cpu_mask = 1 << d->processor; - - /* NB. May recursively lock from domain_relinquish_memory(). */ - spin_lock_recursive(&d->page_alloc_lock); - list_del(&page->list); - drop_dom_ref = (--d->tot_pages == 0); - spin_unlock_recursive(&d->page_alloc_lock); - - page->u.inuse.count_info = 0; - - free_domheap_page(page); - } - - if ( drop_dom_ref ) - put_domain(d); -} - unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes) { unsigned int alloc_pfns, nr_pages; @@ -296,7 +204,7 @@ unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes) /* Grow the allocation if necessary. */ for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ ) { - if ( unlikely((page=alloc_domain_page(d)) == NULL) ) + if ( unlikely((page = alloc_domheap_page(d)) == NULL) ) { domain_relinquish_memory(d); return -ENOMEM; diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 065f85088d..bc78bb266d 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -27,6 +27,7 @@ #include #include #include +#include /********************* @@ -198,6 +199,9 @@ struct pfn_info *alloc_heap_pages(int zone, int order) struct pfn_info *pg; unsigned long flags; + if ( unlikely(order < MIN_ORDER) || unlikely(order > MAX_ORDER) ) + return NULL; + spin_lock_irqsave(&heap_lock, flags); /* Find smallest order which can satisfy the request. */ @@ -331,18 +335,116 @@ void init_domheap_pages(unsigned long ps, unsigned long pe) init_heap_pages(MEMZONE_DOM, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT); } -struct pfn_info *alloc_domheap_pages(int order) +struct pfn_info *alloc_domheap_pages(struct domain *d, int order) { - struct pfn_info *pg = alloc_heap_pages(MEMZONE_DOM, order); + struct pfn_info *pg; + unsigned long mask, flushed_mask, pfn_stamp, cpu_stamp; + int i; + + ASSERT(!in_irq()); + + if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) ) + return NULL; + + flushed_mask = 0; + for ( i = 0; i < (1 << order); i++ ) + { + pg[i].u.inuse.domain = NULL; + pg[i].u.inuse.type_info = 0; + + if ( (mask = (pg[i].u.free.cpu_mask & ~flushed_mask)) != 0 ) + { + pfn_stamp = pg[i].tlbflush_timestamp; + for ( i = 0; (mask != 0) && (i < smp_num_cpus); i++ ) + { + if ( mask & (1<page_alloc_lock); + + if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) ) + { + DPRINTK("Over-allocation for domain %u: %u > %u\n", + d->domain, d->tot_pages + (1 << order), d->max_pages); + spin_unlock(&d->page_alloc_lock); + free_heap_pages(MEMZONE_DOM, pg, order); + return NULL; + } + + if ( unlikely(d->tot_pages == 0) ) + get_domain(d); + + d->tot_pages += 1 << order; + + for ( i = 0; i < (1 << order); i++ ) + { + pg[i].u.inuse.domain = d; + wmb(); /* Domain pointer must be visible before updating refcnt. */ + pg->u.inuse.count_info = PGC_allocated | 1; + list_add_tail(&pg->list, &d->page_list); + } + + spin_unlock(&d->page_alloc_lock); + return pg; } void free_domheap_pages(struct pfn_info *pg, int order) { - free_heap_pages(MEMZONE_DOM, pg, order); + int i, drop_dom_ref; + struct domain *d = pg->u.inuse.domain; + + if ( unlikely(IS_XEN_HEAP_FRAME(pg)) ) + { + spin_lock_recursive(&d->page_alloc_lock); + d->xenheap_pages -= 1 << order; + drop_dom_ref = (d->xenheap_pages == 0); + spin_unlock_recursive(&d->page_alloc_lock); + } + else + { + /* NB. May recursively lock from domain_relinquish_memory(). */ + spin_lock_recursive(&d->page_alloc_lock); + + for ( i = 0; i < (1 << order); i++ ) + { + pg[i].tlbflush_timestamp = tlbflush_clock; + pg[i].u.inuse.count_info = 0; + pg[i].u.free.cpu_mask = 1 << d->processor; + list_del(&pg[i].list); + } + + d->tot_pages -= 1 << order; + drop_dom_ref = (d->tot_pages == 0); + + spin_unlock_recursive(&d->page_alloc_lock); + + free_heap_pages(MEMZONE_DOM, pg, order); + } + + if ( drop_dom_ref ) + put_domain(d); } unsigned long avail_domheap_pages(void) { return avail[MEMZONE_DOM]; } + diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index 7f9c57cf62..35ad6ea7c9 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -114,9 +114,6 @@ extern unsigned long frame_table_size; extern unsigned long max_page; void init_frametable(void *frametable_vstart, unsigned long nr_pages); -struct pfn_info *alloc_domain_page(struct domain *d); -void free_domain_page(struct pfn_info *page); - int alloc_page_type(struct pfn_info *page, unsigned int type); void free_page_type(struct pfn_info *page, unsigned int type); @@ -131,7 +128,7 @@ static inline void put_page(struct pfn_info *page) while ( unlikely((y = cmpxchg(&page->u.inuse.count_info, x, nx)) != x) ); if ( unlikely((nx & PGC_count_mask) == 0) ) - free_domain_page(page); + free_domheap_page(page); } diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 7133af933d..859db7c9f1 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -2,7 +2,8 @@ #ifndef __XEN_MM_H__ #define __XEN_MM_H__ -#include +struct domain; +struct pfn_info; /* Generic allocator */ unsigned long init_heap_allocator( @@ -20,10 +21,12 @@ void free_xenheap_pages(unsigned long p, int order); /* Domain suballocator */ void init_domheap_pages(unsigned long ps, unsigned long pe); -struct pfn_info *alloc_domheap_pages(int order); +struct pfn_info *alloc_domheap_pages(struct domain *d, int order); void free_domheap_pages(struct pfn_info *pg, int order); unsigned long avail_domheap_pages(void); -#define alloc_domheap_page() (alloc_domheap_pages(0)) +#define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0)) #define free_domheap_page(_p) (free_domheap_pages(_p,0)) +#include + #endif /* __XEN_MM_H__ */ -- 2.30.2